From ea608cc36d26fb5b100630b98cbe28555dad8434 Mon Sep 17 00:00:00 2001 From: "kaf24@firebug.cl.cam.ac.uk" Date: Fri, 21 Apr 2006 17:35:15 +0100 Subject: [PATCH] Pull the Linux percpu interface into Xen. Implemented for x86 and used it to eliminate the percpu_ctxt struct from arch/x86/domain.c. Signed-off-by: Keir Fraser --- xen/arch/x86/domain.c | 38 ++++++++++++++-------------------- xen/arch/x86/setup.c | 39 ++++++++++++++++++++++++++++++++++- xen/arch/x86/smpboot.c | 2 +- xen/arch/x86/x86_32/xen.lds.S | 10 ++++++++- xen/arch/x86/x86_64/xen.lds.S | 10 ++++++++- xen/include/asm-x86/current.h | 18 ++++++++++------ xen/include/asm-x86/percpu.h | 20 ++++++++++++++++++ xen/include/xen/compiler.h | 13 ++++++++++++ xen/include/xen/percpu.h | 15 ++++++++++++++ 9 files changed, 132 insertions(+), 33 deletions(-) create mode 100644 xen/include/asm-x86/percpu.h create mode 100644 xen/include/xen/percpu.h diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index a657fba2eb..e590abf347 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -21,6 +21,12 @@ #include #include #include +#include +#include +#include +#include +#include +#include #include #include #include @@ -30,22 +36,12 @@ #include #include #include -#include -#include #include -#include -#include #include #include #include -#include -#include -struct percpu_ctxt { - struct vcpu *curr_vcpu; - unsigned int dirty_segment_mask; -} __cacheline_aligned; -static struct percpu_ctxt percpu_ctxt[NR_CPUS]; +DEFINE_PER_CPU(struct vcpu *, curr_vcpu); static void paravirt_ctxt_switch_from(struct vcpu *v); static void paravirt_ctxt_switch_to(struct vcpu *v); @@ -123,11 +119,6 @@ void dump_pageframe_info(struct domain *d) } } -void set_current_execstate(struct vcpu *v) -{ - percpu_ctxt[smp_processor_id()].curr_vcpu = v; -} - struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id) { struct vcpu *v; @@ -459,6 +450,7 @@ void new_thread(struct vcpu *d, * allowing load_segments() to avoid some expensive segment loads and * MSR writes. */ +static DEFINE_PER_CPU(unsigned int, dirty_segment_mask); #define DIRTY_DS 0x01 #define DIRTY_ES 0x02 #define DIRTY_FS 0x04 @@ -473,8 +465,8 @@ static void load_segments(struct vcpu *n) unsigned int dirty_segment_mask, cpu = smp_processor_id(); /* Load and clear the dirty segment mask. */ - dirty_segment_mask = percpu_ctxt[cpu].dirty_segment_mask; - percpu_ctxt[cpu].dirty_segment_mask = 0; + dirty_segment_mask = per_cpu(dirty_segment_mask, cpu); + per_cpu(dirty_segment_mask, cpu) = 0; /* Either selector != 0 ==> reload. */ if ( unlikely((dirty_segment_mask & DIRTY_DS) | nctxt->user_regs.ds) ) @@ -601,7 +593,7 @@ static void save_segments(struct vcpu *v) dirty_segment_mask |= DIRTY_GS_BASE_USER; } - percpu_ctxt[smp_processor_id()].dirty_segment_mask = dirty_segment_mask; + this_cpu(dirty_segment_mask) = dirty_segment_mask; } #define switch_kernel_stack(v) ((void)0) @@ -638,7 +630,7 @@ static void __context_switch(void) { struct cpu_user_regs *stack_regs = guest_cpu_user_regs(); unsigned int cpu = smp_processor_id(); - struct vcpu *p = percpu_ctxt[cpu].curr_vcpu; + struct vcpu *p = per_cpu(curr_vcpu, cpu); struct vcpu *n = current; ASSERT(p != n); @@ -692,7 +684,7 @@ static void __context_switch(void) cpu_clear(cpu, p->domain->domain_dirty_cpumask); cpu_clear(cpu, p->vcpu_dirty_cpumask); - percpu_ctxt[cpu].curr_vcpu = n; + per_cpu(curr_vcpu, cpu) = n; } @@ -716,7 +708,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next) set_current(next); - if ( (percpu_ctxt[cpu].curr_vcpu == next) || is_idle_vcpu(next) ) + if ( (per_cpu(curr_vcpu, cpu) == next) || is_idle_vcpu(next) ) { local_irq_enable(); } @@ -758,7 +750,7 @@ int __sync_lazy_execstate(void) local_irq_save(flags); - switch_required = (percpu_ctxt[smp_processor_id()].curr_vcpu != current); + switch_required = (this_cpu(curr_vcpu) != current); if ( switch_required ) __context_switch(); diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index 721b6781d8..70f2822524 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -159,6 +160,38 @@ void discard_initial_images(void) init_domheap_pages(initial_images_start, initial_images_end); } +extern char __per_cpu_start[], __per_cpu_data_end[], __per_cpu_end[]; + +static void percpu_init_areas(void) +{ + unsigned int i, data_size = __per_cpu_data_end - __per_cpu_start; + + BUG_ON(data_size > PERCPU_SIZE); + + for ( i = 1; i < NR_CPUS; i++ ) + memcpy(__per_cpu_start + (i << PERCPU_SHIFT), + __per_cpu_start, + data_size); +} + +static void percpu_free_unused_areas(void) +{ + unsigned int i, first_unused; + + /* Find first unused CPU number. */ + for ( i = 0; i < NR_CPUS; i++ ) + if ( !cpu_online(i) ) + break; + first_unused = i; + + /* Check that there are no holes in cpu_online_map. */ + for ( ; i < NR_CPUS; i++ ) + BUG_ON(cpu_online(i)); + + init_xenheap_pages(__pa(__per_cpu_start) + (first_unused << PERCPU_SHIFT), + __pa(__per_cpu_end)); +} + void __init __start_xen(multiboot_info_t *mbi) { char *cmdline; @@ -209,6 +242,8 @@ void __init __start_xen(multiboot_info_t *mbi) EARLY_FAIL(); } + percpu_init_areas(); + xenheap_phys_end = opt_xenheap_megabytes << 20; if ( mbi->flags & MBI_MEMMAP ) @@ -405,7 +440,7 @@ void __init __start_xen(multiboot_info_t *mbi) BUG_ON(idle_domain == NULL); set_current(idle_domain->vcpu[0]); - set_current_execstate(idle_domain->vcpu[0]); + this_cpu(curr_vcpu) = idle_domain->vcpu[0]; idle_vcpu[0] = current; paging_init(); @@ -482,6 +517,8 @@ void __init __start_xen(multiboot_info_t *mbi) printk("Brought up %ld CPUs\n", (long)num_online_cpus()); smp_cpus_done(max_cpus); + percpu_free_unused_areas(); + initialise_gdb(); /* could be moved earlier */ do_initcalls(); diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c index 81faef1f2a..d199041d30 100644 --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -531,7 +531,7 @@ void __devinit start_secondary(void *unused) set_processor_id(cpu); set_current(idle_vcpu[cpu]); - set_current_execstate(idle_vcpu[cpu]); + this_cpu(curr_vcpu) = idle_vcpu[cpu]; percpu_traps_init(); diff --git a/xen/arch/x86/x86_32/xen.lds.S b/xen/arch/x86/x86_32/xen.lds.S index fb76cbc73d..f58bd191f5 100644 --- a/xen/arch/x86/x86_32/xen.lds.S +++ b/xen/arch/x86/x86_32/xen.lds.S @@ -5,6 +5,7 @@ #include #include +#include #undef ENTRY #undef ALIGN @@ -56,9 +57,16 @@ SECTIONS __initcall_start = .; .initcall.init : { *(.initcall.init) } :text __initcall_end = .; - . = ALIGN(STACK_SIZE); + . = ALIGN(PAGE_SIZE); __init_end = .; + __per_cpu_start = .; + .data.percpu : { *(.data.percpu) } :text + __per_cpu_data_end = .; + . = __per_cpu_start + (NR_CPUS << PERCPU_SHIFT); + . = ALIGN(STACK_SIZE); + __per_cpu_end = .; + __bss_start = .; /* BSS */ .bss : { *(.bss.stack_aligned) diff --git a/xen/arch/x86/x86_64/xen.lds.S b/xen/arch/x86/x86_64/xen.lds.S index d8685201ab..92d395d5f9 100644 --- a/xen/arch/x86/x86_64/xen.lds.S +++ b/xen/arch/x86/x86_64/xen.lds.S @@ -3,6 +3,7 @@ #include #include +#include #undef ENTRY #undef ALIGN @@ -54,9 +55,16 @@ SECTIONS __initcall_start = .; .initcall.init : { *(.initcall.init) } :text __initcall_end = .; - . = ALIGN(STACK_SIZE); + . = ALIGN(PAGE_SIZE); __init_end = .; + __per_cpu_start = .; + .data.percpu : { *(.data.percpu) } :text + __per_cpu_data_end = .; + . = __per_cpu_start + (NR_CPUS << PERCPU_SHIFT); + . = ALIGN(STACK_SIZE); + __per_cpu_end = .; + __bss_start = .; /* BSS */ .bss : { *(.bss.stack_aligned) diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h index 5803141f2a..9b0b6e5245 100644 --- a/xen/include/asm-x86/current.h +++ b/xen/include/asm-x86/current.h @@ -16,7 +16,7 @@ struct vcpu; struct cpu_info { struct cpu_user_regs guest_cpu_user_regs; unsigned int processor_id; - struct vcpu *current_ed; + struct vcpu *current_vcpu; }; static inline struct cpu_info *get_cpu_info(void) @@ -29,12 +29,12 @@ static inline struct cpu_info *get_cpu_info(void) return cpu_info; } -#define get_current() (get_cpu_info()->current_ed) -#define set_current(_ed) (get_cpu_info()->current_ed = (_ed)) +#define get_current() (get_cpu_info()->current_vcpu) +#define set_current(vcpu) (get_cpu_info()->current_vcpu = (vcpu)) #define current (get_current()) #define get_processor_id() (get_cpu_info()->processor_id) -#define set_processor_id(_id) (get_cpu_info()->processor_id = (_id)) +#define set_processor_id(id) (get_cpu_info()->processor_id = (id)) #define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs) @@ -51,8 +51,14 @@ static inline struct cpu_info *get_cpu_info(void) "mov %0,%%"__OP"sp; jmp "STR(__fn) \ : : "r" (guest_cpu_user_regs()) : "memory" ) -#define schedule_tail(_ed) (((_ed)->arch.schedule_tail)(_ed)) +#define schedule_tail(vcpu) (((vcpu)->arch.schedule_tail)(vcpu)) -extern void set_current_execstate(struct vcpu *v); +#include +/* + * Which VCPU's state is currently running on each CPU? + * This is not necesasrily the same as 'current' as a CPU may be + * executing a lazy state switch. + */ +DECLARE_PER_CPU(struct vcpu *, curr_vcpu); #endif /* __X86_CURRENT_H__ */ diff --git a/xen/include/asm-x86/percpu.h b/xen/include/asm-x86/percpu.h new file mode 100644 index 0000000000..d76206587f --- /dev/null +++ b/xen/include/asm-x86/percpu.h @@ -0,0 +1,20 @@ +#ifndef __X86_PERCPU_H__ +#define __X86_PERCPU_H__ + +#define PERCPU_SHIFT 12 +#define PERCPU_SIZE (1UL << PERCPU_SHIFT) + +/* Separate out the type, so (int[3], foo) works. */ +#define DEFINE_PER_CPU(type, name) \ + __attribute__((__section__(".data.percpu"))) \ + __typeof__(type) per_cpu__##name + +/* var is in discarded region: offset to particular copy we want */ +#define per_cpu(var, cpu) \ + (*RELOC_HIDE(&per_cpu__##var, ((unsigned int)(cpu))< +#include +#include + +/* Preferred on Xen. Also see arch-defined per_cpu(). */ +#define this_cpu(var) __get_cpu_var(var) + +/* Linux compatibility. */ +#define get_cpu_var(var) this_cpu(var) +#define put_cpu_var(var) + +#endif /* __XEN_PERCPU_H__ */ -- 2.30.2